cpu_clear(cpu, cpu_idle_map);
rmb();
- if (cpu_is_offline(cpu))
+ if (cpu_is_offline(cpu)) {
+#if defined(CONFIG_XEN) && defined(CONFIG_HOTPLUG_CPU)
+ /* Tell hypervisor to take vcpu down. */
+ HYPERVISOR_vcpu_down(cpu);
+#endif
play_dead();
+ }
irq_stat[cpu].idle_timestamp = jiffies;
xen_idle();
}
#ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_XEN
+ /* Tell hypervisor to bring vcpu up. */
+ HYPERVISOR_vcpu_up(cpu);
+#endif
/* Already up, and in cpu_quiescent now? */
if (cpu_isset(cpu, smp_commenced_mask)) {
cpu_enable(cpu);
return ret;
}
+static inline int
+HYPERVISOR_vcpu_down(
+ int vcpu)
+{
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_vcpu_down | (vcpu << SCHEDOP_vcpushift))
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_vcpu_up(
+ int vcpu)
+{
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_vcpu_up | (vcpu << SCHEDOP_vcpushift))
+ : "memory" );
+
+ return ret;
+}
#endif /* __HYPERCALL_H__ */
* - domain is marked as paused or blocked only if all its vcpus
* are paused or blocked
* - domain is marked as running if any of its vcpus is running
+ * - only map vcpus that aren't down. Note, at some point we may
+ * wish to demux the -1 value to indicate down vs. not-ever-booted
+ *
*/
for_each_vcpu ( d, v ) {
- op->u.getdomaininfo.vcpu_to_cpu[v->vcpu_id] = v->processor;
+ /* only map vcpus that are up */
+ if ( !(test_bit(_VCPUF_down, &v->vcpu_flags)) )
+ op->u.getdomaininfo.vcpu_to_cpu[v->vcpu_id] = v->processor;
op->u.getdomaininfo.cpumap[v->vcpu_id] = v->cpumap;
if ( !(v->vcpu_flags & VCPUF_ctrl_pause) )
flags &= ~DOMFLAGS_PAUSED;
struct vcpu_guest_context *c;
struct domain *d;
struct vcpu *v;
+ int i;
d = find_domain_by_id(op->u.getvcpucontext.domain);
if ( d == NULL )
put_domain(d);
break;
}
+
+ /* find first valid vcpu starting from request. */
+ v = NULL;
+ for ( i = op->u.getvcpucontext.vcpu; i < MAX_VIRT_CPUS; i++ )
+ {
+ v = d->vcpu[i];
+ if ( v != NULL && !(test_bit(_VCPUF_down, &v->vcpu_flags)) )
+ break;
+ }
- v = d->vcpu[op->u.getvcpucontext.vcpu];
if ( v == NULL )
{
ret = -ESRCH;
return 0;
}
+/* Mark target vcpu as non-runnable so it is not scheduled */
+static long do_vcpu_down(int vcpu)
+{
+ struct vcpu *target;
+
+ if ( vcpu > MAX_VIRT_CPUS )
+ return -EINVAL;
+
+ target = current->domain->vcpu[vcpu];
+ if ( target == NULL )
+ return -ESRCH;
+ set_bit(_VCPUF_down, &target->vcpu_flags);
+
+ return 0;
+}
+
+/* Mark target vcpu as runnable and wake it */
+static long do_vcpu_up(int vcpu)
+{
+ struct vcpu *target;
+
+ if (vcpu > MAX_VIRT_CPUS)
+ return -EINVAL;
+
+ target = current->domain->vcpu[vcpu];
+ if ( target == NULL )
+ return -ESRCH;
+ clear_bit(_VCPUF_down, &target->vcpu_flags);
+ /* wake vcpu */
+ domain_wake(target);
+
+ return 0;
+}
+
/*
* Demultiplex scheduler-related hypercalls.
*/
domain_shutdown((u8)(op >> SCHEDOP_reasonshift));
break;
}
+ case SCHEDOP_vcpu_down:
+ {
+ ret = do_vcpu_down((int)(op >> SCHEDOP_vcpushift));
+ break;
+ }
+ case SCHEDOP_vcpu_up:
+ {
+ ret = do_vcpu_up((int)(op >> SCHEDOP_vcpushift));
+ break;
+ }
default:
ret = -ENOSYS;
#define __HYPERVISOR_boot_vcpu 24
#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
#define __HYPERVISOR_mmuext_op 26
-#define __HYPERVISOR_policy_op 27
+#define __HYPERVISOR_policy_op 27
/*
* VIRTUAL INTERRUPTS
#define SCHEDOP_yield 0 /* Give up the CPU voluntarily. */
#define SCHEDOP_block 1 /* Block until an event is received. */
#define SCHEDOP_shutdown 2 /* Stop executing this domain. */
+#define SCHEDOP_vcpu_down 3 /* make target VCPU not-runnable. */
+#define SCHEDOP_vcpu_up 4 /* make target VCPU runnable. */
#define SCHEDOP_cmdmask 255 /* 8-bit command. */
#define SCHEDOP_reasonshift 8 /* 8-bit reason code. (SCHEDOP_shutdown) */
+#define SCHEDOP_vcpushift 8 /* 8-bit VCPU target. (SCHEDOP_up|down) */
/*
* Reason codes for SCHEDOP_shutdown. These may be interpreted by control
/* Initialization completed. */
#define _VCPUF_initialised 8
#define VCPUF_initialised (1UL<<_VCPUF_initialised)
+ /* VCPU is not-runnable */
+#define _VCPUF_down 9
+#define VCPUF_down (1UL<<_VCPUF_down)
/*
* Per-domain flags (domain_flags).
static inline int domain_runnable(struct vcpu *v)
{
return ( (atomic_read(&v->pausecnt) == 0) &&
- !(v->vcpu_flags & (VCPUF_blocked|VCPUF_ctrl_pause)) &&
+ !(v->vcpu_flags & (VCPUF_blocked|VCPUF_ctrl_pause|VCPUF_down)) &&
!(v->domain->domain_flags & (DOMF_shutdown|DOMF_shuttingdown)) );
}